Conv2dTranspose ================= 计算二维转置卷积,可以视为 Conv2d 对输入求梯度,也称为反卷积(实际不是真正的反卷积)。 输入的 shape 通常为 :math:`(N, H_{in}, W_{in}, C_{in})`,其中: - :math:`N` 是 batch size - :math:`C_{in}` 是空间维度 - :math:`H_{in}, W_{in}` 分别为特征层的高度和宽度 输入: - **input_x** - 输入数据的地址 - **input_w** - 输入卷积核权重的地址 - **bias** - 输入偏置的地址 - **param** - 算子计算所需参数的结构体。其各成员见下述。 - **core_mask** - 核掩码。 **ConvTransposeParameter定义:** .. code-block:: c :linenos: typedef struct ConvTransposeParameter { void* workspace_; // 用于存放中间计算结果 int output_batch_; // 输出数据总批次 int input_batch_; // 输入数据总批次 int input_h_; // 输入数据h维度大小 int input_w_; // 输入数据w维度大小 int output_h_; // 输出数据h维度大小 int output_w_; // 输出数据w维度大小 int input_channel_; // 输入数据通道数 int output_channel_; // 输出数据通道数 int kernel_h_; // 卷积核h维度大小 int kernel_w_; // 卷积核w维度大小 int group_; // 组数 int pad_l_; // 左填充大小 int pad_u_; // 上填充大小 int dilation_h_; // 卷积核h维度膨胀尺寸大小 int dilation_w_; // 卷积核w维度膨胀尺寸大小 int stride_h_; // 卷积核h维度步长 int stride_w_; // 卷积核w维度步长 int buffer_size_; // 为分块计算所分配的缓存大小 } ConvTransposeParameter; 输出: - **out_y** - 输出地址。 支持平台: ``FT78NE`` ``MT7004`` .. note:: - FT78NE 支持int8, fp32 - MT7004 支持fp16, fp32 **共享存储版本:** .. c:function:: void i8_convtranspose_s(int8_t* input_x, int8_t* input_w, int8_t* out_y, int* bias, ConvTransposeParameter *conv_param, int core_mask) .. c:function:: void hp_convtranspose_s(half* input_x, half* input_w, half* out_y, half* bias, ConvTransposeParameter *conv_param, int core_mask) .. c:function:: void fp_convtranspose_s(float* input_x, float* input_w, float* out_y, float* bias, ConvTransposeParameter *conv_param, int core_mask) **C调用示例:** .. code-block:: c :linenos: :emphasize-lines: 35 void TestConvTransposeSMCFp32(int* input_shape, int* weight_shape, int* output_shape, int* stride, int* padding, int* dilation, int groups, float* bias, int core_mask) { int core_id = get_core_id(); int logic_core_id = GetLogicCoreId(core_mask, core_id); int core_num = GetCoreNum(core_mask); float* input_data = (float*)0x88000000; float* weight = (float*)0x89000000; float* output_data = (float*)0x90000000; float* bias_data = (float*)0x91000000; float* check = (float*)0x94000000; ConvTransposeParameter* param = (ConvTransposeParameter*)0x92000000; if (logic_core_id == 0) { memcpy(bias_data, bias, sizeof(float) * output_shape[3]); memset(output_data, 0, output_shape[0] * output_shape[1] * output_shape[2] * output_shape[3] * sizeof(float)); memset(check, 0, output_shape[0] * output_shape[1] * output_shape[2] * output_shape[3] * sizeof(float)); param->dilation_h_ = dilation[0]; param->dilation_w_ = dilation[1]; param->group_ = groups; param->input_batch_ = input_shape[0]; param->input_h_ = input_shape[1]; param->input_w_ = input_shape[2]; param->input_channel_ = input_shape[3]; param->kernel_h_ = weight_shape[1]; param->kernel_w_ = weight_shape[2]; param->output_batch_ = output_shape[0]; param->output_h_ = output_shape[1]; param->output_w_ = output_shape[2]; param->output_channel_ = output_shape[3]; param->stride_h_ = stride[0]; param->stride_w_ = stride[0]; param->pad_u_ = padding[0]; param->pad_l_ = padding[2]; param->workspace_ = (float*)0xA0000000; } sys_bar(0, core_num); // 初始化参数完成后进行同步 fp_convtranspose_s(input_data, weight, output_data, bias_data, param, core_mask); } void main(){ int in_channel = 6; int out_channel = 6; int groups = 6; int input_shape[4] = {2, 5, 7, in_channel}; // NHWC int weight_shape[4] = {in_channel, 3, 3, out_channel / groups}; int output_shape[4] = {2, 7, 9, out_channel}; // NHWC int stride[2] = {1, 1}; int padding[4] = {0, 0, 0, 0}; int dilation[2]= {1, 1}; float bias[] = {0, 0, 0, 0, 0, 0}; int core_mask = 0b1111; TestConvTransposeSMCFp32(input_shape, weight_shape, output_shape, stride, padding, dilation, groups, bias, core_mask); } **私有存储版本:** .. c:function:: void i8_convtranspose_p(int8_t* input_x, int8_t* input_w, int8_t* out_y, int* bias, ConvTransposeParameter *conv_param, int core_mask) .. c:function:: void hp_convtranspose_p(half* input_x, half* input_w, half* out_y, half* bias, ConvTransposeParameter *conv_param, int core_mask) .. c:function:: void fp_convtranspose_p(float* input_x, float* input_w, float* out_y, float* bias, ConvTransposeParameter *conv_param, int core_mask) **C调用示例:** .. code-block:: c :linenos: :emphasize-lines: 30 void TestConvTransposeL2Fp32(int* input_shape, int* weight_shape, int* output_shape, int* stride, int* padding, int* dilation, int groups, float* bias, int core_mask) { float* input_data = (float*)0x10000000; // 私有存储版本地址设置在AM内 float* weight = (float*)0x10001000; float* output_data = (float*)0x10002000; float* bias_data = (float*)0x10003000; float* check = (float*)0x10004000; ConvTransposeParameter* param = (ConvTransposeParameter*)0x10005000; memcpy(bias_data, bias, sizeof(float) * output_shape[3]); memset(output_data, 0, output_shape[0] * output_shape[1] * output_shape[2] * output_shape[3] * sizeof(float)); memset(check, 0, output_shape[0] * output_shape[1] * output_shape[2] * output_shape[3] * sizeof(float)); param->dilation_h_ = dilation[0]; param->dilation_w_ = dilation[1]; param->group_ = groups; param->input_batch_ = input_shape[0]; param->input_h_ = input_shape[1]; param->input_w_ = input_shape[2]; param->input_channel_ = input_shape[3]; param->kernel_h_ = weight_shape[1]; param->kernel_w_ = weight_shape[2]; param->output_batch_ = output_shape[0]; param->output_h_ = output_shape[1]; param->output_w_ = output_shape[2]; param->output_channel_ = output_shape[3]; param->stride_h_ = stride[0]; param->stride_w_ = stride[0]; param->pad_u_ = padding[0]; param->pad_l_ = padding[2]; param->workspace_ = (float*)0x10006000; param->buffer_size_ = 1024; // 私有存储版本中,必须设置该参数,用于确定分块计算的大小 fp_convtranspose_p(input_data, weight, output_data, bias_data, param, core_mask); } void main(){ int in_channel = 6; int out_channel = 6; int groups = 6; int input_shape[4] = {2, 5, 7, in_channel}; // NHWC int weight_shape[4] = {in_channel, 3, 3, out_channel / groups}; int output_shape[4] = {2, 7, 9, out_channel}; // NHWC int stride[2] = {1, 1}; int padding[4] = {0, 0, 0, 0}; int dilation[2]= {1, 1}; float bias[] = {0, 0, 0, 0, 0, 0}; int core_mask = 0b0001; // 私有存储版本只能设置为一个核心启动 TestConvTransposeL2Fp32(input_shape, weight_shape, output_shape, stride, padding, dilation, groups, bias, core_mask); }